for ( i = 0; i < PFN_UP(sizeof(struct vcpu_guest_context)); ++i )
{
- struct page_info *pg = alloc_domheap_page(NULL, 0);
+ struct page_info *pg = alloc_domheap_page(current->domain,
+ MEMF_no_owner);
if ( unlikely(pg == NULL) )
{
l4_pgentry_t *l4tab;
int rc;
- pg = alloc_domheap_page(NULL, MEMF_node(vcpu_to_node(v)));
+ pg = alloc_domheap_page(v->domain, MEMF_no_owner);
if ( pg == NULL )
return -ENOMEM;
}
else
{
- page = alloc_domheap_page(NULL, 0);
+ page = alloc_domheap_page(d, MEMF_no_owner);
if ( !page )
panic("Not enough RAM for domain 0 PML4");
page->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
break;
}
- page = alloc_domheap_page(NULL, 0);
+ page = alloc_domheap_page(current->domain, MEMF_no_owner);
if ( !page )
{
ret = -ENOMEM;
for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
{
- pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( pg == NULL )
break;
s->vram_page[i] = pg;
int vlapic_init(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
- unsigned int memflags = MEMF_node(vcpu_to_node(v));
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
if (vlapic->regs_page == NULL)
{
- vlapic->regs_page = alloc_domheap_page(NULL, memflags);
+ vlapic->regs_page = alloc_domheap_page(v->domain, MEMF_no_owner);
if ( vlapic->regs_page == NULL )
{
dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
l3_pgentry_t *l3tab;
l2_pgentry_t *l2tab;
l1_pgentry_t *l1tab;
- unsigned int memf = MEMF_node(domain_to_node(d));
int rc = 0;
ASSERT(va >= PERDOMAIN_VIRT_START &&
if ( !d->arch.perdomain_l3_pg )
{
- pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( !pg )
return -ENOMEM;
l3tab = __map_domain_page(pg);
if ( !(l3e_get_flags(l3tab[l3_table_offset(va)]) & _PAGE_PRESENT) )
{
- pg = alloc_domheap_page(NULL, memf);
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( !pg )
{
unmap_domain_page(l3tab);
{
if ( pl1tab && !IS_NIL(pl1tab) )
{
- l1tab = alloc_xenheap_pages(0, memf);
+ l1tab = alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
if ( !l1tab )
{
rc = -ENOMEM;
}
else
{
- pg = alloc_domheap_page(NULL, memf);
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( !pg )
{
rc = -ENOMEM;
if ( ppg &&
!(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) )
{
- pg = alloc_domheap_page(NULL, memf);
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( pg )
{
clear_domain_page(page_to_mfn(pg));
if ( d->arch.paging.hap.total_pages < pages )
{
/* Need to allocate more memory from domheap */
- pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ pg = alloc_domheap_page(d, MEMF_no_owner);
if ( pg == NULL )
{
HAP_PRINTK("failed to allocate hap pages.\n");
{
/* Need to allocate more memory from domheap */
sp = (struct page_info *)
- alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ alloc_domheap_page(d, MEMF_no_owner);
if ( sp == NULL )
{
SHADOW_PRINTK("failed to allocate shadow pages.\n");